struct task_struct {
- int processor;
- int state;
- int hyp_events;
- unsigned int domain;
+ /*
+ * DO NOT CHANGE THE ORDER OF THE FOLLOWING.
+ * There offsets are hardcoded in entry.S
+ */
+
+ int processor; /* 00: current processor */
+ int state; /* 04: current run state */
+ int hyp_events; /* 08: pending events */
+ unsigned int domain; /* 12: domain id */
/* An unsafe pointer into a shared data area. */
- shared_info_t *shared_info;
+ shared_info_t *shared_info; /* 16: shared data area */
+
+ /*
+ * From here on things can be added and shuffled without special attention
+ */
struct list_head pg_head;
- unsigned int tot_pages; /* number of pages currently possesed */
- unsigned int max_pages; /* max number of pages that can be possesed */
+ unsigned int tot_pages; /* number of pages currently possesed */
+ unsigned int max_pages; /* max number of pages that can be possesed */
+ /* scheduling */
+ struct list_head run_list; /* the run list */
+ int has_cpu;
+ int policy;
+ int counter;
+
+ struct ac_timer blt; /* blocked timeout */
+
+ s_time_t lastschd; /* time this domain was last scheduled */
+ s_time_t cpu_time; /* total CPU time received till now */
+
+ long mcu_advance; /* inverse of weight */
+ u32 avt; /* actual virtual time */
+ u32 evt; /* effective virtual time */
+ long warp; /* virtual time warp */
+ long warpl; /* warp limit */
+ long warpu; /* unwarp time requirement */
+ long warped; /* time it ran warped last time */
+ long uwarped; /* time it ran unwarped last time */
+
+
/* Network I/O */
net_ring_t *net_ring_base;
net_vif_t *net_vif_list[MAX_GUEST_VIFS];
/* Block I/O */
blk_ring_t *blk_ring_base;
- unsigned int blk_req_cons; /* request consumer */
+ unsigned int blk_req_cons; /* request consumer */
+ unsigned int blk_resp_prod; /* (private version of) response producer */
struct list_head blkdev_list;
spinlock_t blk_ring_lock;
+ segment_t *segment_list[XEN_MAX_SEGMENTS]; /* vhd */
+ int segment_count;
- int has_cpu, policy, counter;
-
- struct list_head run_list;
-
+ /* VM */
struct mm_struct mm;
+ /* We need this lock to check page types and frob reference counts. */
+ spinlock_t page_lock;
+
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread